{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2.3.0\n" ] } ], "source": [ "import os\n", "import shutil\n", "import time\n", "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import tensorflow as tf\n", "\n", "from tensorflow.keras import Sequential\n", "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n", "from tensorflow.keras.layers import Dense, Flatten, Softmax, Conv2D, Dropout, MaxPooling2D\n", "\n", "print(tf.__version__)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "mnist = tf.keras.datasets.mnist.load_data()\n", "(x_train, y_train), (x_test, y_test) = mnist" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Image height x width is 28 x 28\n", "There are 10 classes\n" ] } ], "source": [ "HEIGHT, WIDTH = x_train[0].shape\n", "NCLASSES = tf.size(tf.unique(y_train).y)\n", "print(\"Image height x width is\", HEIGHT, \"x\", WIDTH)\n", "tf.print(\"There are\", NCLASSES, \"classes\")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def get_model():\n", " \n", " model = Sequential([\n", " Conv2D(64, kernel_size=3,\n", " activation='relu', input_shape=(WIDTH, HEIGHT, 1)),\n", " MaxPooling2D(2),\n", " Conv2D(32, kernel_size=3,\n", " activation='relu'),\n", " MaxPooling2D(2),\n", " Flatten(),\n", " Dense(400, activation='relu'),\n", " Dense(100, activation='relu'),\n", " Dropout(.25),\n", " Dense(10),\n", " Softmax()\n", " ])\n", "\n", " model.compile(optimizer='adam',\n", " loss='categorical_crossentropy',\n", " metrics=['accuracy'])\n", " \n", " return model" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "BUFFER_SIZE = 5000\n", "BATCH_SIZE = 100\n", "\n", "def scale(image, label):\n", " image = tf.cast(image, tf.float32)\n", " image /= 255\n", " image = tf.expand_dims(image, -1)\n", " return image, label\n", "\n", "\n", "def load_dataset(training=True):\n", " \"\"\"Loads MNIST dataset into a tf.data.Dataset\"\"\"\n", " (x_train, y_train), (x_test, y_test) = mnist\n", " x = x_train if training else x_test\n", " y = y_train if training else y_test\n", " # One-hot encode the classes\n", " y = tf.keras.utils.to_categorical(y, NCLASSES)\n", " dataset = tf.data.Dataset.from_tensor_slices((x, y))\n", " dataset = dataset.map(scale).batch(BATCH_SIZE)\n", " if training:\n", " dataset = dataset.shuffle(BUFFER_SIZE).repeat()\n", " return dataset" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "WARNING:tensorflow:From /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/summary_ops_v2.py:1277: stop (from tensorflow.python.eager.profiler) is deprecated and will be removed after 2020-07-01.\n", "Instructions for updating:\n", "use `tf.profiler.experimental.stop` instead.\n", "\n", "Epoch 00001: saving model to mnist_digits/\n", "100/100 - 38s - loss: 0.6817 - accuracy: 0.7822 - val_loss: 0.1847 - val_accuracy: 0.9444\n", "Epoch 2/10\n", "\n", "Epoch 00002: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.1862 - accuracy: 0.9419 - val_loss: 0.1023 - val_accuracy: 0.9672\n", "Epoch 3/10\n", "\n", "Epoch 00003: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.1381 - accuracy: 0.9597 - val_loss: 0.0983 - val_accuracy: 0.9671\n", "Epoch 4/10\n", "\n", "Epoch 00004: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.1012 - accuracy: 0.9707 - val_loss: 0.0602 - val_accuracy: 0.9802\n", "Epoch 5/10\n", "\n", "Epoch 00005: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.0755 - accuracy: 0.9770 - val_loss: 0.0698 - val_accuracy: 0.9776\n", "Epoch 6/10\n", "\n", "Epoch 00006: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.0782 - accuracy: 0.9766 - val_loss: 0.0413 - val_accuracy: 0.9869\n", "Epoch 7/10\n", "\n", "Epoch 00007: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.0638 - accuracy: 0.9804 - val_loss: 0.0397 - val_accuracy: 0.9866\n", "Epoch 8/10\n", "\n", "Epoch 00008: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.0599 - accuracy: 0.9830 - val_loss: 0.0405 - val_accuracy: 0.9868\n", "Epoch 9/10\n", "\n", "Epoch 00009: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.0602 - accuracy: 0.9818 - val_loss: 0.0378 - val_accuracy: 0.9879\n", "Epoch 10/10\n", "\n", "Epoch 00010: saving model to mnist_digits/\n", "100/100 - 37s - loss: 0.0546 - accuracy: 0.9838 - val_loss: 0.0414 - val_accuracy: 0.9867\n", "training took: 390.4977 secs.\n" ] } ], "source": [ "NUM_EPOCHS = 10\n", "STEPS_PER_EPOCH = 100\n", "\n", "model = get_model()\n", "train_data = load_dataset()\n", "validation_data = load_dataset(training=False)\n", "\n", "OUTDIR = \"mnist_digits/\"\n", "checkpoint_callback = ModelCheckpoint(\n", " OUTDIR, save_weights_only=True, verbose=1)\n", "tensorboard_callback = TensorBoard(log_dir=OUTDIR)\n", "t1 = time.perf_counter()\n", "history = model.fit(\n", " train_data, \n", " validation_data=validation_data,\n", " epochs=NUM_EPOCHS, \n", " steps_per_epoch=STEPS_PER_EPOCH,\n", " verbose=2,\n", " callbacks=[checkpoint_callback, tensorboard_callback]\n", ")\n", "t2 = time.perf_counter()\n", "print(\"training took: {:4.4f} secs.\".format(t2 - t1))" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", "
\n",
" \n",
"\n",
" | \n",
"\n", "\n", " | \n", "\n", "Value:\n", " | \n", "\n",
"-\n", " | \n",
"
\n",
" \n",
"\n",
" | \n",
"\n", "\n", " | \n", "\n", "Value:\n", " | \n", "\n",
"-\n", " | \n",
"